# I load the needed libraries
library(dplyr)
library(scales)
library(GoFKernel)
library(mvtnorm)
library(gplots)
options(warn=-1)
Attaching package: ‘dplyr’
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
Loading required package: KernSmooth
KernSmooth 2.23 loaded
Copyright M. P. Wand 1997-2009
Attaching package: ‘gplots’
The following object is masked from ‘package:stats’:
lowess
I load the functions from the class file:
source("../MCMC.R")
I define then the function that I want to use as output of the MCMCs:
# Function to sampled from: n-dim gaussian with chosen sigmas and centers
posterior_g_inhom = function (theta) {
sigmas = c(1:length(theta))
centers = c(seq(length(theta), 1))
product = 1
for (i in 1:length(theta)) {
product = product * exp(-(theta[i] - centers[i])**2/sigmas[i]**2)
}
return (product)
}
chosen_function = posterior_g_inhom
Then I only have to determine the parameters for the initialization = the "hyperparameters" of the simulations
# The initial parameters are:
init = c(4, 4, 4)
std = diag(1, 3)
N = as.integer(1e5)
burn_in = as.integer(1e4)
# N = as.integer(1e4)
# burn_in = as.integer(1e3)
print_step = as.integer(1e2)
# print_init = as.integer(1e3)
N_tot = N + burn_in
# For Haario:
epsilon = 0.001
# MVTNORM
# Evaluate then the MCMC
mcmc_g = random_steps_mvtnorm (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
# Selecting the sequence after the burn-in
mcmc_g = mcmc_g[burn_in:N, ]
# Plotting the results
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.42 %
# MVTNORM GIBBS
mcmc_g = random_steps_mvtnorm_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 60.00545 %
# # SIMPLE ADAPTIVE
# mcmc_g = random_steps_simple (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# # SIMPLE ADAPTIVE GIBBS
# mcmc_g = random_steps_simple_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
# gamma_function = gamma_series_exp, halved_step = burn_in)
# mcmc_g = mcmc_g[burn_in:N, ]
# show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
# HAARIO
mcmc_g = random_steps_haario (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 13.41636 %
Final mean = 3.002492 2.002584 0.9645224
Final covariance matrix =
[,1] [,2] [,3]
[1,] 31.72016 20.761742 10.559135
[2,] 20.76174 17.599919 7.176388
[3,] 10.55914 7.176388 11.388420
# HAARIO GIBBS
mcmc_g = random_steps_haario_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot,
sigma = std, print_accept=TRUE, t_0 = burn_in, eps = epsilon)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 16.34182 %
Final mean = 3.002133 2.051001 1.011137
Final covariance matrix =
[,1] [,2] [,3]
[1,] 32.35706 21.091632 10.423934
[2,] 21.09163 17.479107 6.947936
[3,] 10.42393 6.947936 10.029906
# RAO
mcmc_g = random_steps_AM_rao (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 49.54636 %
Final mean = 3.011292 2.01277 0.9713684
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.50078136 -0.01833289 -0.01819649
[2,] -0.01833289 1.97279339 0.06893830
[3,] -0.01819649 0.06893830 4.39504015
# RAO GIBBS
mcmc_g = random_steps_AM_rao_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in/2)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 61.75091 %
Final mean = 2.99597 2.012553 0.9214468
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.596878067 -0.003576676 -0.008336312
[2,] -0.003576676 1.806679781 0.017996751
[3,] -0.008336312 0.017996751 3.935594945
# GLOBAL
mcmc_g = random_steps_global (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 35.74545 %
Final mean = 2.958152 2.000933 1.084931
Final lambda = 1.135289
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.54389412 0.04502161 -0.01670756
[2,] 0.04502161 1.85401636 0.10235503
[3,] -0.01670756 0.10235503 4.69468794
# GLOBAL GIBBS
mcmc_g = random_steps_global_gibbs (func_wanted = chosen_function, theta_init = init, n_samples = N_tot, sigma = std, print_accept=TRUE, t_0 = burn_in,
gamma_function = gamma_series_exp, halved_step = burn_in)
mcmc_g = mcmc_g[burn_in:N, ]
show_results(mcmc = mcmc_g, init = init, std = std, step = print_step)
Acceptance rate = 42.69364 %
Final mean = 2.992296 1.958158 0.7991791
Final lambda = 0.8549626
Final covariance matrix =
[,1] [,2] [,3]
[1,] 0.56116625 -0.01347894 0.02853076
[2,] -0.01347894 1.88785668 0.08385695
[3,] 0.02853076 0.08385695 3.76797181